Guild icon
Project Sekai
🔒 UMDCTF 2023 / ❌-ml-french-baguette
Avatar
French Baguette - 500 points
Category: Ml Description: Some French guy made this weird program. It takes an image as input and returns some file. I inputted the flag into the program. You can have the output :) Author: Segal Files:Tags: No tags.
Sutx pinned a message to this channel. 04/28/2023 3:01 PM
Avatar
@Violin wants to collaborate 🤝
Avatar
@fleming wants to collaborate 🤝
Avatar
@kanon wants to collaborate 🤝
Avatar
@layka_ wants to collaborate 🤝
09:29
@TheBadGod wants to collaborate 🤝
Avatar
@chenx3n wants to collaborate 🤝
09:41
@hfz wants to collaborate 🤝
Avatar
model struct: struct model { double m1[6][5][5]; double m2[6][16][5][5]; double m3[16][120][5][5]; double m4[120][10]; double b1[6]; double b2[16]; double b3[120]; double b4[10]; }; mx are the weights and bx are the biases
10:01
the [5][5] are just optimiziations for block matrix multiplication
10:02
__int64 __fastcall forward(model *model, input_data *input, double (__fastcall *a3)(double)) { // [COLLAPSED LOCAL DECLARATIONS. PRESS KEYPAD CTRL-"+" TO EXPAND] for ( i = 0; !i; i = 1 ) { for ( j = 0; j <= 5; ++j ) { for ( k = 0; k <= 27; ++k ) { for ( m = 0; m <= 27; ++m ) { for ( n = 0; n <= 4; ++n ) { for ( ii = 0; ii <= 4; ++ii ) input->mid_mat[j][k][m] = model->m1[j][n][ii] * input->in_mat[k + n][ii + m] + input->mid_mat[j][k][m]; } } } } } for ( jj = 0; jj <= 5; ++jj ) { for ( kk = 0; kk <= 0x30F; ++kk ) input->mid_mat[jj][0][kk] = a3(input->mid_mat[jj][0][kk] + model->b1[jj]); } for ( mm = 0; mm <= 5; ++mm ) { for ( nn = 0; nn <= 13; ++nn ) { for ( i1 = 0; i1 <= 13; ++i1 ) { v16 = 0; v17 = 0; for ( i2 = 0; i2 < 2; ++i2 ) { for ( i3 = 0; i3 < 2; ++i3 ) { v47 = input->mid_mat[mm][2 * nn + i2][2 * i1 + i3] > input->mid_mat[mm][2 * nn + v16][2 * i1 + v17]; v16 += v47 * (i2 - v16); v17 += v47 * (i3 - v17); } } input->med_mat[mm][nn][i1] = input->mid_mat[mm][2 * nn + v16][2 * i1 + v17]; } } } for ( i4 = 0; i4 <= 5; ++i4 ) { for ( i5 = 0; i5 <= 0xF; ++i5 ) { for ( i6 = 0; i6 <= 9; ++i6 ) { for ( i7 = 0; i7 <= 9; ++i7 ) { for ( i8 = 0; i8 <= 4; ++i8 ) { for ( i9 = 0; i9 <= 4; ++i9 ) input->mad_mat[i5][i6][i7] = model->m2[i4][i5][i8][i9] * input->med_mat[i4][i6 + i8][i7 + i9] + input->mad_mat[i5][i6][i7]; } } } } } for ( i10 = 0; i10 <= 0xF; ++i10 ) { for ( i11 = 0; i11 <= 99; ++i11 ) input->mad_mat[i10][0][i11] = a3(input->mad_mat[i10][0][i11] + model->b2[i10]); } for ( i12 = 0; i12 <= 0xF; ++i12 ) { for ( i13 = 0; i13 <= 4; ++i13 ) { for ( i14 = 0; i14 <= 4; ++i14 ) { v31 = 0; v32 = 0; for ( i15 = 0; i15 < 2; ++i15 ) { for ( i16 = 0; i16 < 2; ++i16 ) { v46 = input->mad_mat[i12][2 * i13 + i15][2 * i14 + i16] > input->mad_mat[i12][2 * i13 + v31][2 * i14 + v32]; v31 += v46 * (i15 - v31); v32 += v46 * (i16 - v32); } } input->mud_mat[i12][i13][i14] = input->mad_mat[i12][2 * i13 + v31][2 * i14 + v32]; } } } for ( i17 = 0; i17 <= 0xF; ++i17 ) { for ( i18 = 0; i18 <= 0x77; ++i18 ) { for ( i19 = 0; !i19; i19 = 1 ) { for ( i20 = 0; !i20; i20 = 1 ) { for ( i21 = 0; i21 <= 4; ++i21 ) { for ( i22 = 0; i22 <= 4; ++i22 ) input->out_mat[i18] = model->m3[i17][i18][i21][i22] * input->mud_mat[i17][i21][i22] + input->out_mat[i18]; } } } } } for ( i23 = 0; i23 <= 0x77; ++i23 ) { for ( i24 = 0; !i24; i24 = 1 ) input->out_mat[i23] = a3(input->out_mat[i23] + model->b3[i23]); } for ( i25 = 0; i25 <= 0x77; ++i25 ) { for ( i26 = 0; i26 <= 9; ++i26 ) input->final_mat[i26] = model->m4[i25][i26] * input->out_mat[i25] + input->final_mat[i26]; } for ( i27 = 0; ; ++i27 ) { result = i27; if ( i27 > 9 ) break; input->final_mat[i27] = a3(input->final_mat[i27] + model->b_[i27]); } return result; }
10:02
cleaned up multiplication
10:02
input is struct input_data { double in_mat[32][32]; double mid_mat[6][28][28]; double med_mat[6][14][14]; double mad_mat[16][10][10]; double mud_mat[16][5][5]; double out_mat[120]; double final_mat[10]; }; just seem to be different temporary matrices
10:02
a3 is just relu
Avatar
im assuming we can blackbox forward and backward functions cuz they are just matrix multiplications?
Avatar
i think so
10:05
will try to get the struct for the output after dinner (but i would assume it's easily guessable)
Avatar
@hfz do you know if this is reversible? (with above struct) kinda forgot about how backtracking propagation works lol
10:06
but i think we will have all data and struct just need proper maths for working backwards (edited)
Avatar
I never did it before, but theoretically doable, since backprop is just derivation
10:23
the flag is just a 25x25 image (the file is just raw pixels, no header or anything)
10:23
the pixels get normalized by doing: flag[i][j] = (flag[i][j] - mean) / std
10:23
in load_input
Avatar
output struct looks something like this struct output { double m1[6][5][5]; double m2[6][16][5][5]; double m3[16][120][5][5]; double m4[120][10]; double b1[6]; double b2[16]; double b3[120]; double b4[10]; };
Avatar
Avatar
TheBadGod
output struct looks something like this struct output { double m1[6][5][5]; double m2[6][16][5][5]; double m3[16][120][5][5]; double m4[120][10]; double b1[6]; double b2[16]; double b3[120]; double b4[10]; };
this is the grad.dat file
Avatar
Avatar
TheBadGod
model struct: struct model { double m1[6][5][5]; double m2[6][16][5][5]; double m3[16][120][5][5]; double m4[120][10]; double b1[6]; double b2[16]; double b3[120]; double b4[10]; }; mx are the weights and bx are the biases
this is the model.dat file
Avatar
Avatar
TheBadGod
model struct: struct model { double m1[6][5][5]; double m2[6][16][5][5]; double m3[16][120][5][5]; double m4[120][10]; double b1[6]; double b2[16]; double b3[120]; double b4[10]; }; mx are the weights and bx are the biases
the model starts out as random values in the matrices and all biases set to 0
10:46
but doesn't matter because we get the full model LUL
10:50
the fact that the model/output file match with the sizes of these structs means it can't be too wrong
Avatar
Avatar
hfz
I never did it before, but theoretically doable, since backprop is just derivation
does this mean no training needed - we just need to do maths?
Avatar
ok, so i think the first layer is a convolution layer, then we take the max in each 2x2 square and shrink the matrix from 6x28x28 to 6x14x14. then convolution layer again, half the size by taking the max in each 2x2 square again, convolute with the third matrix, then normal linear layer (the 120x10 matrix)
11:41
so its not normal matrix mult, the [5][5] matrices are actually convolution kernels
Avatar
for ( i46 = 0; !i46; i46 = 1 ) { for ( i47 = 0; i47 <= 5; ++i47 ) { for ( i48 = 0; i48 <= 4; ++i48 ) { for ( i49 = 0; i49 <= 4; ++i49 ) { for ( i50 = 0; i50 <= 27; ++i50 ) { for ( i51 = 0; i51 <= 27; ++i51 ) grad->m1[i47][i48][i49] = target->m1[i47][i50][i51] * input->in_mat[i48 + i50][i51 + i49] + grad->m1[i47][i48][i49]; } } } } } we know grad->m1 we also know that in in_mat the first two rows and columns, as well as the last row/column are zero (due to the way we initialize it)
12:37
idk enough math, but maybe we can get the original in_mat with this
12:39
actually since it's 32x32 matrices it's more than just the last row/column, but still same concept
Avatar
in theory z3 can do this again
12:43
like 2nd ML
Avatar
im not gonna implement everything in z3 lol
Avatar
yeah lol
Avatar
i actually started, but it's slow and it crashes
2.94 KB
Avatar
oh i think i know why, indexing wrong into the numpy array
12:45
nvm
12:45
still getting a parser error
Avatar
input_matrix = [[Real(f"i_{i}_{j}") for j in range(32)] for i in range(32)] input values are all 0-256 values right?
Avatar
0-256?
12:46
it does (value - mean) / sigma on all values
12:47
so the values in in_mat should be fairly close to [-1,1] (edited)
Avatar
oh true
Avatar
but im currently trying to figure out a way to get m1, then I should be able to undo the convolution, because i know the first two columns/rows are 0
Avatar
RealVal and Real are the same? author solve script in ML chal 2 used RealVal
Avatar
Avatar
sahuang
RealVal and Real are the same? author solve script in ML chal 2 used RealVal
Real is a symbolic variable, RealVal is a concrete value, no?
12:49
like bitvec and bitvecval
Avatar
oh right
Avatar
for ( i44 = 0; i44 <= 5; ++i44 ) { for ( i45 = 0; i45 <= 0x30F; ++i45 ) grad->b1[i44] += target->m1[i44][0][i45]; } this allows us to get the sum of the 6 different 28x28 kernels; which might be enough?
12:52
nvm
12:52
flipped dimensions
Avatar
@unpickled admin bot wants to collaborate 🤝
Exported 59 message(s)